In [1]:
from jedi import jedi

import matplotlib.pylab as plt
import numpy as np
from __future__ import division
from scipy.integrate import odeint,ode
from numpy import zeros,ones,eye,tanh,dot,outer,sqrt,linspace, \
    cos,pi,hstack,zeros_like,abs,repeat
from numpy.random import uniform,normal,choice
%config InlineBackend.figure_format = 'retina'
%matplotlib inline

In [ ]:

Simulation Parameters


In [2]:
dt = 1       # time step
tmax = 800   # simulation length
tstop = 300

N = 300
J = normal(0, sqrt(1 / N), (N, N))
x0 = uniform(-0.5, 0.5, N)

g = 1.5
u = uniform(-1, 1, N)
w = uniform(-1 / sqrt(N), 1 / sqrt(N), N)  # Initial weights
lr = 1.0  # Learning rate

# Mass model
# Note: tanh_x must be passed be last so that it
# can be updated locally.

def model(t0, x, tanh_x, w): 
    return -x + g * dot(J, tanh_x) + dot(w, tanh_x) * u

# target pattern
target = lambda t0: cos(2 * pi * t0 / 50)

In [3]:
def error_signal_plot(t, z, wu):
    plt.figure(figsize=(10, 5))
    plt.subplot(2, 1, 1)
    plt.plot(t, target(t), '-r', lw=2)
    plt.plot(t, z, '-b')
    plt.legend(('target', 'output'))
    plt.ylim([-1.1, 3])
    plt.xticks([])
    plt.subplot(2, 1, 2)
    plt.plot(t, wu, '-k')
    plt.yscale('log')
    plt.ylabel('$|\Delta w|$', fontsize=20)
    plt.xlabel('time', fontweight='bold', fontsize=16)
    plt.show()

FORCE


In [7]:
X = jedi.force(target, model, lr, dt, tmax, tstop, x0, w)
error_signal_plot(X[1], X[2], X[4])


DFORCE


In [15]:
rho = repeat(0.05, N)
tstop = 500
lr = .4

X = jedi.dforce(rho, target, model, lr, dt, tmax, tstop, x0, w)
error_signal_plot(X[1], X[2], X[4])



In [17]:
# Bit Floppers --
# Sussillo D, Barak O:
# Opening the Black Box: Low-dimensional dynamics in high-dimensional recurrent neural networks. 
# Neural Computation. 25(3):626-649 (2013) 

dt = 1       # time step
tmax = 800   # simulation length
tstop = 300

N = 0
J = normal(0, sqrt(1 / N), (N, N))
x0 = uniform(-0.5, 0.5, N)

g = 1.5
u = uniform(-1, 1, N)
w = uniform(-1 / sqrt(N), 1 / sqrt(N), N)  # Initial weights
lr = 1.0  # Learning rate

# Mass model
# Note: tanh_x must be passed be last so that it
# can be updated locally.

def model(t0, x, tanh_x, w): 
    return -x + g * dot(J, tanh_x) + dot(w, tanh_x) * u

# target pattern
target = lambda t0: cos(2 * pi * t0 / 50)


Out[17]:
(801,)